#include <xen/config.h>
+#include <xen/irq.h>
#include <xen/smp.h>
#include <xen/spinlock.h>
+#ifndef NDEBUG
+
+static atomic_t spin_debug __read_mostly = ATOMIC_INIT(0);
+
+static void check_lock(struct lock_debug *debug)
+{
+ int irq_safe = !local_irq_is_enabled();
+
+ if ( unlikely(atomic_read(&spin_debug) <= 0) )
+ return;
+
+ /* A few places take liberties with this. */
+ /* BUG_ON(in_irq() && !irq_safe); */
+
+ if ( unlikely(debug->irq_safe != irq_safe) )
+ {
+ int seen = cmpxchg(&debug->irq_safe, -1, irq_safe);
+ BUG_ON(seen == !irq_safe);
+ }
+}
+
+void spin_debug_enable(void)
+{
+ atomic_inc(&spin_debug);
+}
+
+void spin_debug_disable(void)
+{
+ atomic_dec(&spin_debug);
+}
+
+#else /* defined(NDEBUG) */
+
+#define check_lock(l) ((void)0)
+
+#endif
+
void _spin_lock(spinlock_t *lock)
{
+ check_lock(&lock->debug);
_raw_spin_lock(&lock->raw);
}
{
ASSERT(local_irq_is_enabled());
local_irq_disable();
+ check_lock(&lock->debug);
_raw_spin_lock(&lock->raw);
}
{
unsigned long flags;
local_irq_save(flags);
+ check_lock(&lock->debug);
_raw_spin_lock(&lock->raw);
return flags;
}
int _spin_is_locked(spinlock_t *lock)
{
+ check_lock(&lock->debug);
return _raw_spin_is_locked(&lock->raw);
}
int _spin_trylock(spinlock_t *lock)
{
+ check_lock(&lock->debug);
return _raw_spin_trylock(&lock->raw);
}
void _spin_barrier(spinlock_t *lock)
{
+ check_lock(&lock->debug);
do { mb(); } while ( _raw_spin_is_locked(&lock->raw) );
mb();
}
/* Don't allow overflow of recurse_cpu field. */
BUILD_BUG_ON(NR_CPUS > 0xfffu);
+ check_lock(&lock->debug);
+
if ( likely(lock->recurse_cpu != cpu) )
{
spin_lock(lock);
void _read_lock(rwlock_t *lock)
{
+ check_lock(&lock->debug);
_raw_read_lock(&lock->raw);
}
{
ASSERT(local_irq_is_enabled());
local_irq_disable();
+ check_lock(&lock->debug);
_raw_read_lock(&lock->raw);
}
{
unsigned long flags;
local_irq_save(flags);
+ check_lock(&lock->debug);
_raw_read_lock(&lock->raw);
return flags;
}
void _write_lock(rwlock_t *lock)
{
+ check_lock(&lock->debug);
_raw_write_lock(&lock->raw);
}
{
ASSERT(local_irq_is_enabled());
local_irq_disable();
+ check_lock(&lock->debug);
_raw_write_lock(&lock->raw);
}
{
unsigned long flags;
local_irq_save(flags);
+ check_lock(&lock->debug);
_raw_write_lock(&lock->raw);
return flags;
}
#include <asm/system.h>
#include <asm/spinlock.h>
+#ifndef NDEBUG
+struct lock_debug {
+ int irq_safe; /* +1: IRQ-safe; 0: not IRQ-safe; -1: don't know yet */
+};
+#define _LOCK_DEBUG { -1 }
+void spin_debug_enable(void);
+void spin_debug_disable(void);
+#else
+struct lock_debug { };
+#define _LOCK_DEBUG { }
+#define spin_debug_enable() ((void)0)
+#define spin_debug_disable() ((void)0)
+#endif
+
typedef struct {
raw_spinlock_t raw;
u16 recurse_cpu:12;
u16 recurse_cnt:4;
+ struct lock_debug debug;
} spinlock_t;
-#define SPIN_LOCK_UNLOCKED { _RAW_SPIN_LOCK_UNLOCKED, 0xfffu, 0 }
+
+#define SPIN_LOCK_UNLOCKED { _RAW_SPIN_LOCK_UNLOCKED, 0xfffu, 0, _LOCK_DEBUG }
#define DEFINE_SPINLOCK(l) spinlock_t l = SPIN_LOCK_UNLOCKED
#define spin_lock_init(l) (*(l) = (spinlock_t)SPIN_LOCK_UNLOCKED)
typedef struct {
raw_rwlock_t raw;
+ struct lock_debug debug;
} rwlock_t;
-#define RW_LOCK_UNLOCKED { _RAW_RW_LOCK_UNLOCKED }
+#define RW_LOCK_UNLOCKED { _RAW_RW_LOCK_UNLOCKED, _LOCK_DEBUG }
#define DEFINE_RWLOCK(l) rwlock_t l = RW_LOCK_UNLOCKED
#define rwlock_init(l) (*(l) = (rwlock_t)RW_LOCK_UNLOCKED)